From eddd8e39554e06e792d5fe5bb5dbeed8dbeceaf6 Mon Sep 17 00:00:00 2001 From: Keir Fraser Date: Wed, 3 Feb 2010 09:35:23 +0000 Subject: [PATCH] xentrace: Trace p2m events Add more tracing to aid in debugging ballooning / PoD: * Nested page faults for EPT/NPT systems * set_p2m_enry * Decrease reservation (for ballooning) * PoD populate, zero reclaim, superpage splinter Signed-off-by: George Dunlap --- xen/arch/x86/hvm/svm/svm.c | 16 ++++++++ xen/arch/x86/hvm/vmx/vmx.c | 16 ++++++++ xen/arch/x86/mm/p2m.c | 76 +++++++++++++++++++++++++++++++++++++- xen/common/memory.c | 15 ++++++++ xen/include/public/trace.h | 8 ++++ 5 files changed, 130 insertions(+), 1 deletion(-) diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 8b12d515f9..8ef1bdb463 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -893,6 +893,22 @@ static void svm_do_nested_pgfault(paddr_t gpa) mfn_t mfn; p2m_type_t p2mt; + if ( tb_init_done ) + { + struct { + uint64_t gpa; + uint64_t mfn; + u32 qualification; + u32 p2mt; + } _d; + + _d.gpa = gpa; + _d.qualification = 0; + _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt)); + + __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d); + } + if ( hvm_hap_nested_page_fault(gfn) ) return; diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 8cf971a45c..66c889bfef 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2100,6 +2100,22 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa) mfn_t mfn; p2m_type_t p2mt; + if ( tb_init_done ) + { + struct { + uint64_t gpa; + uint64_t mfn; + u32 qualification; + u32 p2mt; + } _d; + + _d.gpa = gpa; + _d.qualification = qualification; + _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt)); + + __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d); + } + if ( (qualification & EPT_GLA_VALID) && hvm_hap_nested_page_fault(gfn) ) return; diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index e57ce18d6d..5c2e12dd37 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -829,6 +829,21 @@ p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn) goto out_reset; } + if ( tb_init_done ) + { + struct { + u64 gfn, mfn; + int d:16,order:16; + } t; + + t.gfn = gfn; + t.mfn = mfn_x(mfn); + t.d = d->domain_id; + t.order = 9; + + __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t); + } + /* Finally! We've passed all the checks, and can add the mfn superpage * back on the PoD cache, and account for the new p2m PoD entries */ p2m_pod_cache_add(d, mfn_to_page(mfn0), 9); @@ -928,6 +943,21 @@ p2m_pod_zero_check(struct domain *d, unsigned long *gfns, int count) } else { + if ( tb_init_done ) + { + struct { + u64 gfn, mfn; + int d:16,order:16; + } t; + + t.gfn = gfns[i]; + t.mfn = mfn_x(mfns[i]); + t.d = d->domain_id; + t.order = 0; + + __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t); + } + /* Add to cache, and account for the new p2m PoD entry */ p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0); d->arch.p2m->pod.entry_count++; @@ -1073,6 +1103,21 @@ p2m_pod_demand_populate(struct domain *d, unsigned long gfn, p2md->pod.entry_count -= (1 << order); /* Lock: p2m */ BUG_ON(p2md->pod.entry_count < 0); + if ( tb_init_done ) + { + struct { + u64 gfn, mfn; + int d:16,order:16; + } t; + + t.gfn = gfn; + t.mfn = mfn_x(mfn); + t.d = d->domain_id; + t.order = order; + + __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t); + } + return 0; out_of_memory: spin_unlock(&d->page_alloc_lock); @@ -1091,6 +1136,18 @@ remap_and_retry: for(i=0; i<(1<domain_id; + + __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned char *)&t); + } return 0; } @@ -1141,6 +1198,23 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, l2_pgentry_t l2e_content; int rv=0; + if ( tb_init_done ) + { + struct { + u64 gfn, mfn; + int p2mt; + int d:16,order:16; + } t; + + t.gfn = gfn; + t.mfn = mfn_x(mfn); + t.p2mt = p2mt; + t.d = d->domain_id; + t.order = page_order; + + __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t); + } + #if CONFIG_PAGING_LEVELS >= 4 if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn, L4_PAGETABLE_SHIFT - PAGE_SHIFT, @@ -1225,7 +1299,7 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn, /* Success */ rv = 1; - out: +out: unmap_domain_page(table); return rv; } diff --git a/xen/common/memory.c b/xen/common/memory.c index 329483756a..b1db5f5888 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -28,6 +28,7 @@ #include #include #include +#include struct memop_args { /* INPUT */ @@ -222,6 +223,20 @@ static void decrease_reservation(struct memop_args *a) if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) ) goto out; + if ( tb_init_done ) + { + struct { + u64 gfn; + int d:16,order:16; + } t; + + t.gfn = gmfn; + t.d = a->domain->domain_id; + t.order = a->extent_order; + + __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned char *)&t); + } + /* See if populate-on-demand wants to handle this */ if ( is_hvm_domain(a->domain) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) diff --git a/xen/include/public/trace.h b/xen/include/public/trace.h index b6f112e201..9385cb75ff 100644 --- a/xen/include/public/trace.h +++ b/xen/include/public/trace.h @@ -82,6 +82,12 @@ #define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1) #define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2) #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3) +#define TRC_MEM_SET_P2M_ENTRY (TRC_MEM + 4) +#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5) +#define TRC_MEM_POD_POPULATE (TRC_MEM + 16) +#define TRC_MEM_POD_ZERO_RECLAIM (TRC_MEM + 17) +#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18) + #define TRC_PV_HYPERCALL (TRC_PV + 1) #define TRC_PV_TRAP (TRC_PV + 3) @@ -149,6 +155,8 @@ #define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19) #define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19) #define TRC_HVM_INTR_WINDOW (TRC_HVM_HANDLER + 0x20) +#define TRC_HVM_NPF (TRC_HVM_HANDLER + 0x21) + #define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216) #define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217) -- 2.30.2